return rc;
}
++
++int update_grant_va_mapping_pte(unsigned long pte_addr,
++ l1_pgentry_t _nl1e,
++ struct domain *d,
++ struct vcpu *v)
++{
++ /* Caller must:
++ * . own d's BIGLOCK
++ * . already have 'get_page' correctly on the to-be-installed nl1e
++ * . be responsible for flushing the TLB
++ * . check PTE being installed isn't DISALLOWED
++ */
++
++ int rc = GNTST_okay;
++ void *va;
++ unsigned long gpfn, mfn;
++ struct pfn_info *page;
++ struct domain_mmap_cache mapcache, sh_mapcache;
++ u32 type_info;
++ l1_pgentry_t ol1e;
++
++ /* Grant tables and shadow mode don't currently work together. */
++ ASSERT( !shadow_mode_refcounts(d) );
++
++ /* There shouldn't be any strange bits set on the PTE. */
++ ASSERT( (l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
++
++ cleanup_writable_pagetable(d);
++
++ domain_mmap_cache_init(&mapcache);
++ domain_mmap_cache_init(&sh_mapcache);
++
++ gpfn = pte_addr >> PAGE_SHIFT;
++ mfn = __gpfn_to_mfn(d, gpfn);
++
++ if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
++ {
++ MEM_LOG("Could not get page for normal update");
++ rc = -EINVAL;
++ goto failed_norefs;
++ }
++
++ va = map_domain_page_with_cache(mfn, &mapcache);
++ va = (void *)((unsigned long)va +
++ (unsigned long)(pte_addr & ~PAGE_MASK));
++ page = &frame_table[mfn];
++
++ type_info = page->u.inuse.type_info;
++ if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
++ DPRINTK("Grant map attempted to update a non-L1 page\n");
++ rc = -EINVAL;
++ goto failed;
++ }
++
++ if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
++ {
++
++ if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va,
++ sizeof(ol1e)) != 0) ) {
++ put_page_type(page);
++ rc = -EINVAL;
++ goto failed;
++ }
++
++ if ( update_l1e(va, ol1e, _nl1e) )
++ {
++ put_page_from_l1e(ol1e, d);
++
++ if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
++ rc = GNTST_flush_all; /* We don't know what vaddr to flush */
++ else
++ rc = GNTST_okay; /* Caller need not invalidate TLB entry */
++
++ if ( unlikely(shadow_mode_enabled(d)) )
++ shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
++ }
++ else
++ rc = -EINVAL;
++
++ put_page_type(page);
++ }
++
++ failed:
++ unmap_domain_page_with_cache(va, &mapcache);
++ put_page(page);
++
++ failed_norefs:
++ domain_mmap_cache_destroy(&mapcache);
++ domain_mmap_cache_destroy(&sh_mapcache);
++
++ return rc;
++}
++
++
++
++int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
++ struct domain *d)
++{
++ /* Caller must:
++ * . own d's BIGLOCK
++ * . already have 'get_page' correctly on the to-be-installed nl1e
++ * . be responsible for flushing the TLB
++ * . check PTE being installed isn't DISALLOWED
++ */
++
++ int rc = GNTST_okay;
++ void *va;
++ unsigned long gpfn, mfn;
++ struct pfn_info *page;
++ struct domain_mmap_cache mapcache, sh_mapcache;
++ u32 type_info;
++ l1_pgentry_t ol1e;
++
++ /* Grant tables and shadow mode don't work together. */
++ ASSERT( !shadow_mode_refcounts(d) );
++
++ cleanup_writable_pagetable(d);
++
++ domain_mmap_cache_init(&mapcache);
++ domain_mmap_cache_init(&sh_mapcache);
++
++ gpfn = addr >> PAGE_SHIFT;
++ mfn = __gpfn_to_mfn(d, gpfn);
++
++ if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
++ {
++ MEM_LOG("Could not get page for normal update");
++ rc = -EINVAL;
++ goto failed_norefs;
++ }
++
++ va = map_domain_page_with_cache(mfn, &mapcache);
++ va = (void *)((unsigned long)va +
++ (unsigned long)(addr & ~PAGE_MASK));
++ page = &frame_table[mfn];
++
++ type_info = page->u.inuse.type_info;
++ if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
++ DPRINTK("Grant map attempted to update a non-L1 page\n");
++ rc = -EINVAL;
++ goto failed;
++ }
++
++ if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
++ {
++ if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va,
++ sizeof(ol1e)) != 0) )
++ {
++ rc = -EINVAL;
++ put_page_type(page);
++ goto failed;
++ }
++
++ /*
++ * Check that the virtual address supplied is actually mapped to frame.
++ */
++ if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame ))
++ {
++ DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
++ (unsigned long)l1e_get_intpte(ol1e), addr, frame);
++ rc = -EINVAL;
++ put_page_type(page);
++ goto failed;
++ }
++
++ /* Delete pagetable entry. */
++ if ( unlikely(__put_user(0, (unsigned long *)va)))
++ {
++ DPRINTK("Cannot delete PTE entry at %p.\n", va);
++ rc = -EINVAL;
++ } else {
++ if ( unlikely(shadow_mode_enabled(d)) )
++ shadow_l1_normal_pt_update(d, addr, l1e_empty(),
++ &sh_mapcache);
++ }
++ put_page_type(page);
++ }
++
++ failed:
++ unmap_domain_page_with_cache(va, &mapcache);
++ put_page(page);
++
++ failed_norefs:
++ domain_mmap_cache_destroy(&mapcache);
++ domain_mmap_cache_destroy(&sh_mapcache);
++
++ return rc;
++}
++
++
++
/* This function assumes the caller is holding the domain's BIGLOCK
* and is running in a shadow mode
*/
* . check PTE being installed isn't DISALLOWED
*/
-- int rc = 0;
++ int rc = GNTST_okay;
l1_pgentry_t *pl1e;
l1_pgentry_t ol1e;
{
put_page_from_l1e(ol1e, d);
if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
-- rc = 0; /* Caller needs to invalidate TLB entry */
++ rc = GNTST_flush_one;
else
-- rc = 1; /* Caller need not invalidate TLB entry */
++ rc = GNTST_okay; /* Caller need not invalidate TLB entry */
}
else
rc = -EINVAL;
return rc;
}
++int clear_grant_va_mapping(unsigned long addr, unsigned long frame)
++{
++ l1_pgentry_t *pl1e;
++ unsigned long _ol1e;
++
++ pl1e = &linear_pg_table[l1_linear_offset(addr)];
++
++ if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
++ {
++ DPRINTK("Could not find PTE entry for address %lx\n", addr);
++ return -EINVAL;
++ }
++
++ /*
++ * Check that the virtual address supplied is actually mapped to
++ * frame.
++ */
++ if ( unlikely((_ol1e >> PAGE_SHIFT) != frame ))
++ {
++ DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
++ _ol1e, addr, frame);
++ return -EINVAL;
++ }
++
++ /* Delete pagetable entry. */
++ if ( unlikely(__put_user(0, (unsigned long *)pl1e)))
++ {
++ DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
int do_update_va_mapping(unsigned long va, u64 val64,
unsigned long flags)
*
* Copyright (c) 2005 Christopher Clark
* Copyright (c) 2004 K A Fraser
++ * Copyright (c) 2005 Andrew Warfield
++ * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
grant_table_t *t)
{
unsigned int h;
-- if ( unlikely((h = t->maptrack_head) == t->maptrack_limit) )
++ if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
return -1;
t->maptrack_head = t->maptrack[h].ref_and_flags >> MAPTRACK_REF_SHIFT;
t->map_count++;
struct domain *granting_d,
grant_ref_t ref,
u16 dev_hst_ro_flags,
-- unsigned long host_virt_addr,
++ unsigned long addr,
unsigned long *pframe ) /* OUT */
{
domid_t sdom;
sflags = sha->flags;
sdom = sha->domid;
++ /* This loop attempts to set the access (reading/writing) flags
++ * in the grant table entry. It tries a cmpxchg on the field
++ * up to five times, and then fails under the assumption that
++ * the guest is misbehaving. */
for ( ; ; )
{
u32 scombo, prev_scombo, new_scombo;
/*
* At this point:
-- * act->pin updated to reflect mapping.
++ * act->pin updated to reference count mappings.
* sha->flags updated to indicate to granting domain mapping done.
* frame contains the mfn.
*/
spin_unlock(&granting_d->grant_table->lock);
-- if ( (host_virt_addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) )
++
++ if ( (addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) )
{
/* Write update into the pagetable. */
l1_pgentry_t pte;
pte = l1e_from_pfn(frame, GRANT_PTE_FLAGS);
++
++ if ( (dev_hst_ro_flags & GNTMAP_application_map) )
++ l1e_add_flags(pte,_PAGE_USER);
if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
l1e_add_flags(pte,_PAGE_RW);
-- rc = update_grant_va_mapping( host_virt_addr, pte,
-- mapping_d, mapping_ed );
-- /*
-- * IMPORTANT: (rc == 0) => must flush / invalidate entry in TLB.
-- * This is done in the outer gnttab_map_grant_ref.
-- */
++ if (!(dev_hst_ro_flags & GNTMAP_contains_pte))
++ {
++ rc = update_grant_va_mapping( addr, pte,
++ mapping_d, mapping_ed );
++ } else {
++ rc = update_grant_va_mapping_pte( addr, pte,
++ mapping_d, mapping_ed );
++ }
++ /* IMPORTANT: rc indicates the degree of TLB flush that is required.
++ * GNTST_flush_one (1) or GNTST_flush_all (2). This is done in the
++ * outer gnttab_map_grant_ref. */
if ( rc < 0 )
{
/* Failure: undo and abort. */
/*
* Returns 0 if TLB flush / invalidate required by caller.
* va will indicate the address to be invalidated.
++ *
++ * addr is _either_ a host virtual address, or the address of the pte to
++ * update, as indicated by the GNTMAP_contains_pte flag.
*/
static int
__gnttab_map_grant_ref(
domid_t dom;
grant_ref_t ref;
struct domain *ld, *rd;
-- struct vcpu *led;
++ struct vcpu *led;
u16 dev_hst_ro_flags;
int handle;
-- unsigned long frame = 0, host_virt_addr;
++ unsigned long frame = 0, addr;
int rc;
led = current;
/* Bitwise-OR avoids short-circuiting which screws control flow. */
if ( unlikely(__get_user(dom, &uop->dom) |
__get_user(ref, &uop->ref) |
-- __get_user(host_virt_addr, &uop->host_virt_addr) |
++ __get_user(addr, &uop->host_virt_addr) |
__get_user(dev_hst_ro_flags, &uop->flags)) )
{
DPRINTK("Fault while reading gnttab_map_grant_ref_t.\n");
return -EFAULT; /* don't set status */
}
--
-- if ( ((host_virt_addr != 0) || (dev_hst_ro_flags & GNTMAP_host_map)) &&
-- unlikely(!__addr_ok(host_virt_addr)))
++ if ( (dev_hst_ro_flags & GNTMAP_host_map) &&
++ ( (addr == 0) ||
++ (!(dev_hst_ro_flags & GNTMAP_contains_pte) &&
++ unlikely(!__addr_ok(addr))) ) )
{
DPRINTK("Bad virtual address (%lx) or flags (%x).\n",
-- host_virt_addr, dev_hst_ro_flags);
++ addr, dev_hst_ro_flags);
(void)__put_user(GNTST_bad_virt_addr, &uop->handle);
return GNTST_bad_gntref;
}
grant_mapping_t *new_mt;
grant_table_t *lgt = ld->grant_table;
++ if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
++ {
++ put_domain(rd);
++ DPRINTK("Maptrack table is at maximum size.\n");
++ (void)__put_user(GNTST_no_device_space, &uop->handle);
++ return GNTST_no_device_space;
++ }
++
/* Grow the maptrack table. */
new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
if ( new_mt == NULL )
{
put_domain(rd);
-- DPRINTK("No more map handles available\n");
++ DPRINTK("No more map handles available.\n");
(void)__put_user(GNTST_no_device_space, &uop->handle);
return GNTST_no_device_space;
}
lgt->maptrack_order += 1;
lgt->maptrack_limit <<= 1;
-- printk("Doubled maptrack size\n");
++ DPRINTK("Doubled maptrack size\n");
handle = get_maptrack_handle(ld->grant_table);
}
if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref,
dev_hst_ro_flags,
-- host_virt_addr, &frame)))
++ addr, &frame)))
{
/*
* Only make the maptrack live _after_ writing the pte, in case we
(void)__put_user(frame, &uop->dev_bus_addr);
-- if ( dev_hst_ro_flags & GNTMAP_host_map )
-- *va = host_virt_addr;
++ if ( ( dev_hst_ro_flags & GNTMAP_host_map ) &&
++ !( dev_hst_ro_flags & GNTMAP_contains_pte) )
++ *va = addr;
(void)__put_user(handle, &uop->handle);
}
gnttab_map_grant_ref(
gnttab_map_grant_ref_t *uop, unsigned int count)
{
-- int i, flush = 0;
++ int i, rc, flush = 0;
unsigned long va = 0;
for ( i = 0; i < count; i++ )
-- if ( __gnttab_map_grant_ref(&uop[i], &va) == 0 )
-- flush++;
++ if ( (rc =__gnttab_map_grant_ref(&uop[i], &va)) >= 0 )
++ flush += rc;
if ( flush == 1 )
flush_tlb_one_mask(current->domain->cpumask, va);
grant_mapping_t *map;
u16 flags;
s16 rc = 1;
-- unsigned long frame, virt;
++ unsigned long frame, addr;
ld = current->domain;
/* Bitwise-OR avoids short-circuiting which screws control flow. */
-- if ( unlikely(__get_user(virt, &uop->host_virt_addr) |
++ if ( unlikely(__get_user(addr, &uop->host_virt_addr) |
__get_user(frame, &uop->dev_bus_addr) |
__get_user(handle, &uop->handle)) )
{
/* Frame is now unmapped for device access. */
}
-- if ( (virt != 0) &&
++ if ( (addr != 0) &&
(flags & GNTMAP_host_map) &&
((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0))
{
-- l1_pgentry_t *pl1e;
-- unsigned long _ol1e;
--
-- pl1e = &linear_pg_table[l1_linear_offset(virt)];
--
-- if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
-- {
-- DPRINTK("Could not find PTE entry for address %lx\n", virt);
-- rc = -EINVAL;
-- goto unmap_out;
-- }
--
-- /*
-- * Check that the virtual address supplied is actually mapped to
-- * act->frame.
-- */
-- if ( unlikely((_ol1e >> PAGE_SHIFT) != frame ))
-- {
-- DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
-- _ol1e, virt, frame);
-- rc = -EINVAL;
-- goto unmap_out;
-- }
--
-- /* Delete pagetable entry. */
-- if ( unlikely(__put_user(0, (unsigned long *)pl1e)))
++ if (flags & GNTMAP_contains_pte)
{
-- DPRINTK("Cannot delete PTE entry at %p for virtual address %lx\n",
-- pl1e, virt);
-- rc = -EINVAL;
-- goto unmap_out;
++ if ( (rc = clear_grant_va_mapping_pte(addr, frame, ld)) < 0 )
++ goto unmap_out;
++ } else {
++ if ( (rc = clear_grant_va_mapping(addr, frame)) < 0 )
++ goto unmap_out;
}
map->ref_and_flags &= ~GNTMAP_host_map;
}
rc = 0;
-- *va = virt;
++ if ( !( flags & GNTMAP_contains_pte) )
++ *va = addr;
}
if ( (map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0)
if ( act->pin == 0 )
{
++ act->frame = 0xdeadbeef;
clear_bit(_GTF_reading, &sha->flags);
put_page(&frame_table[frame]);
}
{
DPRINTK("Grant: dom (%hu) SHARED (%d) flags:(%hx) "
"dom:(%hu) frame:(%lx)\n",
-- op.dom, i, sha_copy.flags, sha_copy.domid, sha_copy.frame);
++ op.dom, i, sha_copy.flags, sha_copy.domid,
++ (unsigned long) sha_copy.frame);
}
}
gnttab_donate_t *gop = &uop[i];
#if GRANT_DEBUG
printk("gnttab_donate: i=%d mfn=%08x domid=%d gref=%08x\n",
-- i, gop->mfn, gop->domid, gop->handle);
++ i, (unsigned int)gop->mfn, gop->domid, gop->handle);
#endif
page = &frame_table[gop->mfn];
if ( ld->domain_id != 0 )
{
DPRINTK("Foreign unref rd(%d) ld(%d) frm(%x) flgs(%x).\n",
-- rd->domain_id, ld->domain_id, frame, readonly);
++ rd->domain_id, ld->domain_id, (unsigned int)frame, readonly);
}
#endif